How to use the pathos.pools function in pathos

To help you get started, we’ve selected a few pathos examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uqfoundation / pathos / examples / test_profile.py View on Github external
tuple(_uimap(disable_profiling, range(10))) # in the workers
    for i in _uimap(work, range(-20,-10)):
        print(i)
    """

    # activate profiling, but remove profiling from the worker
    enable_profiling()
    for i in map(not_profiled(work), range(-30,-20)):
        print(i)

    # print stats for profile of 'import math' in another process
    def import_ppft(*args):
        import ppft

    import pathos.pools as pp
    pool = pp.ProcessPool(1)
    profile('cumulative', pipe=pool.pipe)(import_ppft)
    pool.close()
    pool.join()
    pool.clear()
github lukasturcani / stk / src / stk / molecular / topology_graphs / topology_graph.py View on Github external
def _place_building_blocks_parallel(self, mol, vertices, edges):
        bb_id = 0

        vertex_building_blocks = {
            vertex: bb
            for bb, vertices in mol.building_block_vertices.items()
            for vertex in vertices
        }
        bb_map = {
            bb.get_identity_key(): bb
            for bb in mol.get_building_blocks()
        }
        # Use a shorter alias.
        counter = mol.building_block_counter
        with pathos.pools.ProcessPool(self._num_processes) as pool:
            for stage in self._stages:
                verts = []
                bbs = []
                for instance_vertex in stage:
                    verts.append(vertices[instance_vertex.id])
                    bbs.append(vertex_building_blocks[instance_vertex])
                results = pool.map(_place_building_blocks, verts, bbs)

                for result in results:
                    result_bb = result.building_block
                    bb = bb_map[result_bb.get_identity_key()]

                    mol._position_matrix.extend(
                        result_bb.get_position_matrix()
                    )
                    atom_map = self._assign_func_groups_to_edges(
github jcoreyes / OP3 / op3 / envs / blocks / mujoco / utils / data_generation_utils.py View on Github external
def createMultipleSims(args, obs_size, ac_size, createSingleSim, num_workers=1):
    datasets = {'training': args.num_sims, 'validation': min(args.num_sims, 100)}
    n_frames = args.num_frames
    all_results = []
    for folder in datasets:
        num_sims = datasets[folder]
        if num_workers == 1:
            results = [createSingleSim(args) for _ in range(num_sims)]
        else:
            pool = pp.ProcessPool(num_workers)
            results = pool.map(createSingleSim, [args for _ in range(num_sims)])
        all_results.append(results)

    # Save env data to pickle
    if 'env' in all_results[0][0].keys():
        env_data = {}
        for f_idx, folder in enumerate(datasets):
            env_data[folder] = [a_result['env'] for a_result in all_results[f_idx]]
        with open(args.filename+'.pkl', 'wb') as f:
            pickle.dump(env_data, f)


    # Save data to hdf5 file
    with h5py.File(args.filename+'.h5', 'w') as f:
        for f_idx, folder in enumerate(datasets):
            num_sims = datasets[folder]
github seung-lab / kimimaro / kimimaro / intake.py View on Github external
def skeletonize_parallel(
    all_dbf_shm, dbf_shm_location, 
    cc_labels_shm, cc_shm_location, remapping, 
    teasar_params, anisotropy, all_slices, 
    border_targets, extra_targets_before, extra_targets_after,
    progress, fix_borders, fix_branching, 
    cc_segids, parallel, chunk_size
  ):
    prevsigint = signal.getsignal(signal.SIGINT)
    prevsigterm = signal.getsignal(signal.SIGTERM)
    
    executor = pathos.pools.ProcessPool(parallel)

    def cleanup(signum, frame):
      shm.unlink(dbf_shm_location)
      shm.unlink(cc_shm_location)
      executor.terminate()

    signal.signal(signal.SIGINT, cleanup)
    signal.signal(signal.SIGTERM, cleanup)   

    skeletonizefn = partial(parallel_skeletonize_subset, 
      dbf_shm_location, all_dbf_shm.shape, all_dbf_shm.dtype, 
      cc_shm_location, cc_labels_shm.shape, cc_labels_shm.dtype,
      remapping, teasar_params, anisotropy, all_slices, 
      border_targets, extra_targets_before, extra_targets_after, 
      False, # progress, use our own progress bar below
      fix_borders, fix_branching
github seung-lab / igneous / igneous / scripts / merge_sharded_skeletons.py View on Github external
dust_threshold=1000, # voxels 
      tick_threshold=1300, # nm
    )

  merged_skeletons = crt_dict()
  labels = list(skeletons.keys())

  with tqdm(total=len(skeletons), disable=False, desc="Final Merging") as pbar:
    if parallel == 1:
      for label in labels:
        skel = complex_merge(skeletons[label])
        merged_skeletons[skel.id] = skel.to_precomputed()
        del skeletons[label]
        pbar.update(1)
    else:
      pool = pathos.pools.ProcessPool(parallel)
      for skel in pool.uimap(complex_merge, skeletons.values()):
        merged_skeletons[skel.id] = skel.to_precomputed()
        pbar.update(1)
      pool.close()
      pool.join()
      pool.clear()

  return merged_skeletons
github pnnl / socialsim / december-measurements / UserCentricMeasurements.py View on Github external
def getAvgTimebwEventsUsers(self,selectedUsers=True, nCPU=1):
        df = self.determineDf(selectedUsers)
        users = self.df['user'].unique()
        args = [(df, users[i]) for i, item_a in enumerate(users)]
        pool = pp.ProcessPool(nCPU)
        deltas = pool.map(self.getMeanTimeHelper, args)
        return deltas
github lukasturcani / stk / src / stk / populations.py View on Github external
Raises
        ------
        :class:`RuntimeError`
            If a process pool is already open.

        """

        if self._process_pool is not None:
            raise RuntimeError('A process pool is already open.')

        if num_processes is None:
            num_processes = psutil.cpu_count()

        if num_processes != 1:
            self._process_pool = pathos.pools.ProcessPool(
                nodes=num_processes
            )
        return self
github seung-lab / python-task-queue / taskqueue / taskqueue.py View on Github external
# This is a hack to get dill to pickle dynamically
  # generated classes. This is an important use case
  # for when we create iterators with generator __iter__
  # functions on demand.

  # https://github.com/uqfoundation/dill/issues/56

  try:
    task = next(item for item in tasks if item is not None)
  except StopIteration:
    return 

  cls_module = task.__class__.__module__
  task.__class__.__module__ = '__main__'

  with pathos.pools.ProcessPool(parallel) as pool:
    pool.map(uploadfn, tasks)

  task.__class__.__module__ = cls_module

  if not error_queue.empty():
    errors = []
    while not error_queue.empty():
      err = error_queue.get()
      if err is not StopIteration:
        errors.append(err)
    if len(errors):
      raise Exception(errors)
github lukasturcani / stk / src / stk / populations.py View on Github external
# Every combination of cage and encapsulant.
            complexes = stk.Population.init_all(
                building_blocks=[cages, encapsulants],
                topology_graphs=[stk.host_guest_complex.Complex()]
            )

        """

        bbs, topologies = [], []
        mols = it.product(*building_blocks, topology_graphs)
        for *mol_bbs, topology in mols:
            bbs.append(mol_bbs),
            topologies.append(topology)

        with pathos.pools.ProcessPool(num_processes) as pool:
            mols = pool.map(ConstructedMolecule, bbs, topologies)

        # Update the cache.
        if use_cache:
            for i, mol in enumerate(mols):
                # If the molecule did not exist already, add it to the
                # cache.
                if (
                    not ConstructedMolecule.has_cached_mol(
                        identity_key=mol.get_identity_key()
                    )
                ):
                    mol.update_cache()
                # If the molecule did exist already, use the cached
                # version.
                else:
github JaredFern / VecShare / vecshare / vecshare.py View on Github external
download(bool,opt): Flag for saving query results
	Returns:
		Pandas Dataframe, each row specifying a word vector.
	 """
	 # Set names should have '-' and file names should use "_"
	def partial_query(args):
	    return dw.query(args[0], args[1]).dataframe

	if case_sensitive: title = 'text'
	else:
		words = [word.lower() for word in words]
		title = 'lower(text)'
	set_name,vs_format = _error_check(emb_name,set_name= set_name,vs_format =vs_format)
	query_list,proc_cnt=[],16
	ind_results, combined_vecs = pd.DataFrame(),pd.DataFrame()
	multiproc = pp.ProcessPool(proc_cnt)

	if vs_format == 'large':
		try:
			ind_query = 'SELECT * FROM ' + emb_name + ' where ' + title
			if len(words)==1:
				cond = '"' + words[0] + '"'
				ind_results = dw.query(set_name, ind_query + cond).dataframe
			else:
				for i in range(0,len(words), 400):
					query_words=words[i:i+400]
					query_list.append([set_name, ind_query + ' in'+ str(tuple(query_words)) ])
				word_index = multiproc.map(partial_query, query_list)
				word_index = [word for word in word_index]
				for each in word_index:
					ind_results= ind_results.append(each)
		except: