How to use the emcee.utils function in emcee

To help you get started, we’ve selected a few emcee examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lmfit / lmfit-py / examples / lmfit_and_emcee.py View on Github external
def residuals(paras):
    a = paras['a'].value
    b = paras['b'].value
    t1 = paras['t1'].value
    t2 = paras['t2'].value
    return a * np.exp(-x/t1) + b * np.exp(-x/t2) - y

#Fit the data with lmfit.
mini = lmfit.Minimizer(residuals, params)
result = mini.leastsq()
lmfit.report_errors(result.params)

#create lnfunc and starting distribution.
lnfunc, guess = create_all(result)
nwalkers, ndim = 30, len(guess)
p0 = emcee.utils.sample_ball(guess, 0.1*np.array(guess), nwalkers)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnfunc)
steps = 500
sampler.run_mcmc(p0, steps)

if HASPYLAB:
    fig, axes = plt.subplots(5, 1, sharex=True, figsize=(8, 9))
    for (i, name, rv) in zip(range(5), list(params.keys()) + ['sigma'], [a, b, t1, t2, sigma]):
        axes[i].plot(sampler.chain[:, :, i].T, color="k", alpha=0.05)
        axes[i].yaxis.set_major_locator(plt.MaxNLocator(5))
        axes[i].axhline(rv, color="#888888", lw=2)
        axes[i].set_ylabel("$%s$"% name)
    axes[-1].set_xlabel("Steps")

    plt.figure()

    try:
github dstndstn / astrometry.net / blind / mctweak.py View on Github external
def mctweak(wcs, xy, rd):
    obj = McTweak(wcs, xy, rd)

    # Initial args
    args,sigs = get_sip_args(wcs)

    print 'Args:', args
    print 'Sigs:', sigs
    print 'Number of arguments:', len(args)
    print 'Logodds:', obj(args)

    ndim, nwalkers = len(args), 100
    p0 = emcee.utils.sample_ball(args, sigs, size=nwalkers)
    print 'p0', p0.shape

    ps = PlotSequence('mctweak')

    W,H = wcs.get_width(), wcs.get_height()
    mywcs = Sip(wcs)
    
    sampler = emcee.EnsembleSampler(nwalkers, ndim, obj)
    lnp0, rstate = None, None
    pp = []
    for step in range(10000):
        print 'Step', step
        p0,lnp0,rstate = sampler.run_mcmc(p0, 1, lnprob0=lnp0, rstate0=rstate)
        print 'Best logprob:', np.max(lnp0)
        i = np.argmax(lnp0)
        print 'Best args:', p0[i,:]
github DarkEnergySurvey / ugali / ugali / analysis / mcmc.py View on Github external
def get_ball(self, params, size=1):
        mle = self.get_mle() 
        std = self.get_std()
                                             
        p0 = np.array([mle[k] for k in params])
        s0 = np.array([std[k] for k in params])
 
        return emcee.utils.sample_ball(p0,s0,size)
github sibirrer / lenstronomy / lenstronomy / Extensions / CosmoSampling / cosmo_chain.py View on Github external
def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start):
        """
        returns the mcmc analysis of the parameter space
        """
        sampler = emcee.EnsembleSampler(n_walkers, self.cosmoParam.numParam, self.chain.likelihood)
        p0 = emcee.utils.sample_ball(mean_start, sigma_start, n_walkers)
        new_pos, _, _, _ = sampler.run_mcmc(p0, n_burn)
        sampler.reset()
        store = InMemoryStorageUtil()
        for pos, prob, _, _ in sampler.sample(new_pos, iterations=n_run):
            store.persistSamplingValues(pos, prob, None)
        return store.samples
github dfm / emcee / examples / blobs.py View on Github external
import emcee
import numpy as np


# This is a dumb-ass log-probability function that also returns a random
# "blob" (this can be any arbitrary---preferably picklable---Python object
# that is associated with this particular position in parameter space.
def lnprob(p):
    return -0.5 * np.sum(p ** 2), np.random.rand()


# Set up the sampler and randomly select a starting position.
nwalkers, ndim = 100, 50
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0 = emcee.utils.sample_ball(np.zeros(ndim), np.random.rand(ndim),
        nwalkers)

# Sample for a few iterations.
niterations = 500
for pos, lp, rstate, blob in sampler.sample(p0, iterations=niterations):
    print blob

# The blobs are stored in the `blobs` object. This object is a list (of
# length `niterations`) of lists (of length `nwalkers`).
print sampler.blobs
github sibirrer / lenstronomy / lenstronomy / Cosmo / cosmo_sampling.py View on Github external
def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start):
        """
        returns the mcmc analysis of the parameter space
        """
        sampler = emcee.EnsembleSampler(n_walkers, self.cosmoParam.numParam, self.chain.likelihood, args=())
        p0 = emcee.utils.sample_ball(mean_start, sigma_start, n_walkers)
        #p0 = mean_start *np.random.randn(n_walkers, self.cosmoParam.numParam)
        sampler.run_mcmc(p0, n_burn+n_run, progress=True)
        flat_samples = sampler.get_chain(discard=n_burn, thin=1, flat=True)
        return flat_samples
github rodluger / starry / tex / figures / hd189733b.py View on Github external
# If there are no saved chains in this path
    if not os.path.exists(chain_path):

        # Find ML solution
        # Initialize system
        star, planet, system = instantiate_HD189()
        results = MaxLikeCartography(data.time, data.y, data.yerr, system,
                                     planet, N=N, jac=grad)
        results.compute()

        # Initialize system *without gradients*
        star, planet, system = instantiate_HD189()

        # Initialize MCMC walkers *from maximum likelihood optimized solution*
        p0 = emcee.utils.sample_ball(results.res.x, std_ball, nwalk)

        # Run MCMC
        mcmc = MCMCCartography(data.time, data.y, data.yerr, system,
                               planet, p0=p0,
                               chain_path=chain_path)
        mcmc.run_mcmc(nsteps=nsteps)
        mcmc.save_chain()

    else:

        # Initialize system *without gradients*
        star, planet, system = instantiate_HD189()

        # Read-in saved chain
        mcmc = MCMCCartography(data.time, data.y, data.yerr, system, planet,
                               chain_path=chain_path)
github gammapy / gammapy / gammapy / modeling / sampling.py View on Github external
sampler : `emcee.EnsembleSampler`
        sampler object containing the trace of all walkers.
    """
    import emcee

    dataset.models.parameters.autoscale()  # Autoscale parameters
    pars = [par.factor for par in dataset.models.parameters.free_parameters]
    ndim = len(pars)

    # Initialize walkers in a ball of relative size 0.5% in all dimensions if the
    # parameters have been fit, or to 10% otherwise
    # TODO: the spread of 0.5% below is valid if a pre-fit of the model has been obtained.
    # currently the run_mcmc() doesn't know the status of previous fit.
    spread = 0.5 / 100
    p0var = np.array([spread * pp for pp in pars])
    p0 = emcee.utils.sample_ball(pars, p0var, nwalkers)

    labels = []
    for par in dataset.models.parameters.free_parameters:
        labels.append(par.name)
        if (par.min is np.nan) and (par.max is np.nan):
            log.warning(
                f"Missing prior for parameter: {par.name}.\nMCMC will likely fail!"
            )

    log.info(f"Free parameters: {labels}")

    sampler = emcee.EnsembleSampler(
        nwalkers, ndim, lnprob, args=[dataset], threads=threads
    )

    log.info(f"Starting MCMC sampling: nwalkers={nwalkers}, nrun={nrun}")
github ronpandolfi / Xi-cam / xicam / plugins / cdsaxs / fitting.py View on Github external
map_MH = map

        if use_mh == 'MH':
            samplers = list(map_MH(get_sampler, zip(range(nwalkers), repeat(N), sigma, repeat(nsteps),
                                                    repeat(residual), repeat(verbose))))
            chain = np.dstack(sampler.chain for sampler in samplers)
            s = chain.shape
            flatchain = np.transpose(chain, axes=[0, 2, 1]).reshape(s[0] * s[2], s[1])
            lnprobability = np.vstack(sampler.lnprobability for sampler in samplers)
            flatlnprobability = lnprobability.transpose().flatten()
            minfitness_each_gen = np.min(-lnprobability * c, axis=0)
        else:
            print('{} parameters'.format(N))
            if use_mh:
                individuals = [np.zeros(N) for _ in range(nwalkers)]
                mh_proposal = emcee.utils.MH_proposal_axisaligned(sigma)
                sampler = emcee.EnsembleSampler(
                    nwalkers, N, residual, args=[False, False], threads=parallel)
                for i, _ in enumerate(
                        sampler.sample(individuals, None, None, iterations=nsteps, mh_proposal=mh_proposal)):
                    if verbose:
                        do_verbose(i, sampler)
            else:
                individuals = [[np.random.normal(loc=0, scale=s) for s in sigma] for _ in range(nwalkers)]
                sampler = emcee.EnsembleSampler(
                    nwalkers, N, residual, args=[False, False], threads=parallel)
                for i, _ in enumerate(sampler.sample(individuals, None, None, iterations=nsteps)):
                    if verbose:
                        do_verbose(i, sampler)
            s = sampler.chain.shape
            flatchain = np.transpose(sampler.chain, axes=[1, 0, 2]).reshape(s[0] * s[1], s[2])
            flatlnprobability = sampler.lnprobability.transpose().flatten()
github adrn / thejoker / scripts / continue-with-emcee.py View on Github external
# transform samples to the parameters we'll sample using emcee
    samples = opars.pack()

    if fixed_jitter: # HACK
        samples = np.delete(samples, 4, axis=1)

    samples_trans = mcmc.pack_mcmc(samples.T, fixed_jitter=fixed_jitter).T

    if fixed_jitter: # HACK
        samples_trans = np.delete(samples_trans, -1, axis=1)

    j_max = np.argmax([mcmc.ln_posterior(s, data, fixed_jitter=fixed_jitter) for s in samples_trans])
    p0 = samples_trans[j_max]

    n_walkers = config.defaults['M_min']
    p0 = emcee.utils.sample_ball(p0, 1E-5*np.abs(p0), size=n_walkers)

    sampler = emcee.EnsembleSampler(n_walkers, p0.shape[1],
                                    lnpostfn=mcmc.ln_posterior, args=(data,fixed_jitter),
                                    pool=pool)

    pos,prob,state = sampler.run_mcmc(p0, n_steps) # MAGIC NUMBER

    pool.close()

    pos = np.hstack((pos, np.zeros((pos.shape[0],1))))
    emcee_samples = mcmc.unpack_mcmc(pos.T)
    with h5py.File(output_filename, 'a') as f:
        g = f.create_group('emcee')

        for i,(name,unit) in enumerate(OrbitalParams._name_to_unit.items()):
            g.create_dataset(name, data=emcee_samples[i])