How to use the pathos.multiprocessing function in pathos

To help you get started, we’ve selected a few pathos examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nanograv / PINT / pint / scripts / event_optimize.py View on Github external
pos[ii][idx] = svals[ii]
    else:
        pos = [newfitvals + ftr.fiterrs*args.initerrfact*np.random.randn(ndim)
            for i in range(nwalkers)]
    # Set the 0th walker to have the initial pre-fit solution
    # This way, one walker should always be in a good position
    pos[0] = ftr.fitvals

    import emcee
    # Following are for parallel processing tests...
    if 0:
        def unwrapped_lnpost(theta, ftr=ftr):
            return ftr.lnposterior(theta)

        import pathos.multiprocessing as mp
        pool = mp.ProcessPool(nodes=8)
        sampler = emcee.EnsembleSampler(nwalkers, ndim, unwrapped_lnpost,
                                        pool=pool, args=[ftr])
    else:
        sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior)
    # The number is the number of points in the chain
    sampler.run_mcmc(pos, nsteps)

    def chains_to_dict(names, sampler):
        chains = [sampler.chain[:,:,ii].T for ii in range(len(names))]
        return dict(zip(names,chains))

    def plot_chains(chain_dict, file=False):
        npts = len(chain_dict)
        fig, axes = plt.subplots(npts, 1, sharex=True, figsize=(8, 9))
        for ii, name in enumerate(chain_dict.keys()):
            axes[ii].plot(chain_dict[name], color="k", alpha=0.3)
github morganics / bayesianpy / bayesianpy / output.py View on Github external
def _calc_num_threads(self, df_size: int, query_size: int, max_threads=None) -> int:
        num_queries = df_size * query_size

        if mp.cpu_count() == 1:
            max = 1
        else:
            max = mp.cpu_count() - 1

        calc = int(num_queries / 5000)
        if calc > max:
            r = max
        elif calc <= 1:
            if num_queries > 1000:
                r = 2
            else:
                r = 1
        else:
            r = calc

        if max_threads is not None and r > max_threads:
github andrewowens / multisensory / src / aolib / util.py View on Github external
def parmap_dill(f, xs, nproc = None):
   import pathos.multiprocessing
   if nproc is None:
     nproc = 12
   pool = pathos.multiprocessing.Pool(processes = nproc)
   try:
     ret = pool.map_async(f, xs).get(10000000)
   finally:
     pool.close()
   return ret
github morganics / bayesianpy / bayesianpy / output.py View on Github external
def _calc_num_threads(self, df_size: int, query_size: int, max_threads=None) -> int:
        num_queries = df_size * query_size

        if mp.cpu_count() == 1:
            max = 1
        else:
            max = mp.cpu_count() - 1

        calc = int(num_queries / 5000)
        if calc > max:
            r = max
        elif calc <= 1:
            if num_queries > 1000:
                r = 2
            else:
                r = 1
        else:
            r = calc

        if max_threads is not None and r > max_threads:
github PPPLDeepLearning / plasma-python / plasma / models / runner.py View on Github external
from plasma.models.builder import ModelBuilder
    specific_builder = ModelBuilder(conf)

    y_prime = []
    y_gold = []
    disruptive = []

    model = specific_builder.build_model(True)
    model.compile(optimizer=optimizer_class(),
                  loss=conf['data']['target'].loss)

    specific_builder.load_model_weights(model)
    model_save_path = specific_builder.get_latest_save_path()

    start_time = time.time()
    pool = mp.Pool(use_cores)
    fn = partial(make_single_prediction, builder=specific_builder,
                 loader=loader, model_save_path=model_save_path)

    print('running in parallel on {} processes'.format(pool._processes))
    for (i, (y_p, y, is_disruptive)) in enumerate(pool.imap(fn, shot_list)):
        print('Shot {}/{}'.format(i, len(shot_list)))
        sys.stdout.flush()
        y_prime.append(y_p)
        y_gold.append(y)
        disruptive.append(is_disruptive)
    pool.close()
    pool.join()
    print('Finished Predictions in {} seconds'.format(time.time()-start_time))
    loader.set_inference_mode(False)
    return y_prime, y_gold, disruptive