How to use the emcee.MHSampler function in emcee

To help you get started, we’ve selected a few emcee examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pyspeckit / pyspeckit / pyspeckit / spectrum / models / model.py View on Github external
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
        >>> sp.specfit(fittype='gaussian')
        >>> emcee_sampler = sp.specfit.fitter.get_emcee_sampler(sp.xarr, sp.data, sp.error)
        >>> p0 = sp.specfit.parinfo
        >>> emcee_sampler.run_mcmc(p0,100)
        """
        try:
            import emcee
        except ImportError:
            return

        def probfunc(pars):
            return self.logp(xarr, data, error, pars=pars)

        raise NotImplementedError("emcee's metropolis-hastings sampler is not implemented; use pymc")
        sampler = emcee.MHSampler(self.npars*self.npeaks+self.vheight, probfunc, **kwargs)

        return sampler
github iancze / PSOAP / scripts / psoap_sample_SB1.py View on Github external
# Determine how many parameters we will actually be fitting
# The difference between all of the parameters and the parameters we will be fixing
dim = len(utils.registered_params[config["model"]]) - len(config["fix_params"])

# Read in starting parameters
p0 = utils.convert_dict(config["model"], config["fix_params"], **pars)

try:
    cov = np.load("opt_jump.npy")
    print("using optimal jumps")
except:
    print("using hand-specified jumps")
    cov = utils.convert_dict(config["model"], config["fix_params"], **config["jumps"])**2

sampler = MHSampler(cov, dim, lnprob)

for i, result in enumerate(sampler.sample(p0, iterations=config["samples"])):
    if (i+1) % 20 == 0:
        print("Iteration", i +1)

# Save the actual chain of samples
print("Acceptance fraction", sampler.acceptance_fraction)
np.save("lnprob.npy", sampler.lnprobability)
np.save("flatchain.npy", sampler.flatchain)
github dfm / emcee / examples / harlemcmc.py View on Github external
def build_mixture(means, covs):
    m = Mixture(means, covs)
    mixes.append(m)
    samplers.append(emcee.EnsembleSampler(nwalkers, ndim, m))
    mh_samps.append(emcee.MHSampler(0.005 * np.array([[1, 0], [0, 1]]),
                                    ndim, m))
    ics.append(np.array([4 * np.random.rand(2) - 2 for n in range(nwalkers)]))
    mh_ics.append(means[np.random.randint(len(means))])
github iancze / PSOAP / scripts / psoap_sample_SB2.py View on Github external
# Import the Metropolis-hastings sampler
from emcee import MHSampler

# The difference between all of the parameters and the parameters we will be fixing
dim = len(utils.registered_params[config["model"]]) - len(config["fix_params"])
p0 = utils.convert_dict(config["model"], config["fix_params"], **pars)

try:
    cov = np.load(config["opt_jump"])
    print("using optimal jumps")
except:
    print("using hand-specified jumps")
    cov = utils.convert_dict(config["model"], config["fix_params"], **config["jumps"])**2

sampler = MHSampler(cov, dim, lnprob)

for i, result in enumerate(sampler.sample(p0, iterations=config["samples"])):
    if (i+1) % 20 == 0:
        print("Iteration", i +1)
        # print("{0:5.1%}".format(float(i) / nsteps))

# Save the actual chain of samples
print("Acceptance fraction", sampler.acceptance_fraction)
np.save("lnprob.npy", sampler.lnprobability)
np.save("flatchain.npy", sampler.flatchain)
github ronpandolfi / Xi-cam / xicam / plugins / cdsaxs / fitting.py View on Github external
def get_sampler(a):
            walker_num, N, sigma, nsteps, residual, verbose = a
            cov = np.identity(N) * sigma ** 2
            sampler = emcee.MHSampler(cov.copy(), cov.shape[0], residual, args=[False, False])
            for i, _ in enumerate(sampler.sample(np.zeros(N), None, None, iterations=nsteps)):
                if verbose and (walker_num == 0):
                    do_verbose(i, sampler)
            return sampler
github dfm / emcee / document / plots / oned.py View on Github external
ndim = int(np.ceil(2 ** (7 * _random.rand())))
    nwalkers = 2 * ndim + 2
    # nwalkers += nwalkers % 2
    print(ndim, nwalkers)

    cov = random_cov(ndim)
    icov = np.linalg.inv(cov)

    ens_samp = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn, args=[icov])
    ens_samp.random_state = _random.get_state()
    pos, lnprob, state = ens_samp.run_mcmc(
        np.random.randn(nwalkers * ndim).reshape([nwalkers, ndim]), nsteps
    )

    proposal = np.diag(cov.diagonal())
    mh_samp = emcee.MHSampler(proposal, ndim, lnprobfn, args=[icov])
    mh_samp.random_state = state
    mh_samp.run_mcmc(np.random.randn(ndim), nsteps)

    f = h5py.File(outfn)
    f["data"][i, :] = np.array(
        [ndim, np.mean(ens_samp.acor), np.mean(mh_samp.acor)]
    )
    f.close()