How to use the pathos.helpers.mp function in pathos

To help you get started, we’ve selected a few pathos examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github MIC-DKFZ / trixi / trixi / logger / visdom / numpyvisdomlogger.py View on Github external
import atexit
from collections import defaultdict
import inspect

import os

if os.name == "nt":
    IS_WINDOWS = True
    from queue import Queue
    from threading import Thread as Process
else:
    IS_WINDOWS = False
    from pathos.helpers import mp
    Queue = mp.Queue
    Process = mp.Process
import sys
import traceback

import numpy as np

from trixi.logger.abstractlogger import AbstractLogger, convert_params
from trixi.util import ExtraVisdom


def add_to_queue(func):
    def wrapper(self, *args, **kwargs):
        tpl = (func, args, kwargs)
        self._queue.put_nowait(tpl)

    return wrapper
github MIC-DKFZ / trixi / trixi / logger / visdom / numpyvisdomlogger.py View on Github external
from __future__ import division, print_function

import atexit
from collections import defaultdict
import inspect

import os

if os.name == "nt":
    IS_WINDOWS = True
    from queue import Queue
    from threading import Thread as Process
else:
    IS_WINDOWS = False
    from pathos.helpers import mp
    Queue = mp.Queue
    Process = mp.Process
import sys
import traceback

import numpy as np

from trixi.logger.abstractlogger import AbstractLogger, convert_params
from trixi.util import ExtraVisdom


def add_to_queue(func):
    def wrapper(self, *args, **kwargs):
        tpl = (func, args, kwargs)
        self._queue.put_nowait(tpl)

    return wrapper
github uqfoundation / pathos / pathos / profile.py View on Github external
def process_id():
    "get the identifier (process id) for the current process"
    from pathos.helpers import mp
    return mp.current_process().pid
github alvinxhan / PhyCLIP / phyclip_modules / global_treeinfo.py View on Github external
nindex_to_node[n] = node
            node_to_nindex[node] = n

            # get parent node (except for root)
            try:
                node_to_parent_node[n] = node_to_nindex[node.up]
            except:
                pass

            # node annotation for final tree output
            node_string = re.sub('[^\)]+$', '', node.write(format=5))
            tree_string = tree_string.replace(node_string, '{}[&NODE_ID={}]'.format(node_string, n))

        # multi-proc setup
        manager = mp.Manager()
        # shared memory
        leaf_dist_to_node_queue = manager.Queue()
        node_to_leaves_queue = manager.Queue()
        # generate processes
        processes = []

        nindex_list = nindex_to_node.keys()[:]
        shuffle(nindex_list) # shuffle to make multi-processes more equitable
        increment = int(len(nindex_list)/self.cores)

        for p in range(self.cores):
            if p == self.cores-1:
                curr_nindex_list = nindex_list[p*increment:]
            else:
                curr_nindex_list = nindex_list[p*increment:(p*increment)+increment]
github stanford-futuredata / optimus-maximus / benchmark / consts.py View on Github external
def get_numa_queue(num_jobs_per_numa_node=1):
    m = mp.Manager()
    queue = m.Queue(NUM_NUMA_NODES * num_jobs_per_numa_node)
    for cpu_id_offsets in NUMA_CPU_ID_OFFSETS:
        for i in range(num_jobs_per_numa_node):
            queue.put(
                get_cpu_assignments(cpu_id_offsets,
                                    NUM_VIRTUAL_CORES_PER_POOL))
    return queue
github Kaiserreich / HOI4-Validator / Scripts / parsePDS.py View on Github external
pds_list_members << (pds_members | ZeroOrMore(pds_value))

    pds_member.ignore(pythonStyleComment)
    pds_members.ignore(pythonStyleComment)
    #pds_members.setDebug()

    def parse_PDS_script(self, string): 
        if not string.strip():
            return string
        #Logger.log(string[:100])
        string = self.pds_members.parseString(string, True)
        #gc.collect()
        return(string)

PARSER = PDSParser()
PREPARSE_LOCK = pathos.helpers.mp.Lock()
def preparse_PDS_script(string, start, end, lock=PREPARSE_LOCK, use_category=True):
    blocks = defaultdict(list) if use_category else []
    open_braces = 0
    start_idx_set = False
    start_idx = 0
    end_idx_set = False
    end_idx = 0
    is_comment = False
    is_quoted = False #Stop-gap
    if use_category:
        category = ""
        current_category = ""
    string = re.sub(r"#.*\n", "", string).strip()
    string = re.sub(r"\blog\b\s*=\s*\".*?\"", "", string)
    for idx, c in enumerate(string):
        if c == '#':
github fjxmlzn / GPUTaskScheduler / gpu_task_scheduler / gpu_task_scheduler.py View on Github external
def start(self):
        gpu_envs = self._config_manager.get_gpu_envs()
        processes = []
        for gpu_env in gpu_envs:
            try:
                processes.append(
                    pathos.helpers.mp.process.Process(
                        target=gpu_worker, args=(
                            gpu_env["name"], gpu_env["env"],
                            self._lock, self._config, self._gpu_task_class,
                            self._config_manager, logging.getLogger(""))))
            except AttributeError:
                processes.append(
                    pathos.helpers.mp.Process(
                        target=gpu_worker, args=(
                            gpu_env["name"], gpu_env["env"],
                            self._lock, self._config, self._gpu_task_class,
                            self._config_manager, logging.getLogger(""))))
        for process in processes:
            process.start()
        for process in processes:
            process.join()
github alvinxhan / PhyCLIP / phyclip_modules / global_treeinfo.py View on Github external
node_to_leaves_queue = manager.Queue()
        # generate processes
        processes = []

        nindex_list = nindex_to_node.keys()[:]
        shuffle(nindex_list) # shuffle to make multi-processes more equitable
        increment = int(len(nindex_list)/self.cores)

        for p in range(self.cores):
            if p == self.cores-1:
                curr_nindex_list = nindex_list[p*increment:]
            else:
                curr_nindex_list = nindex_list[p*increment:(p*increment)+increment]

            #for n, node in nindex_to_node.items():
            proc = mp.Process(target=self.get_leaf_distance_to_node, args=(curr_nindex_list, [nindex_to_node[n] for n in curr_nindex_list], leaf_dist_to_node_queue, node_to_leaves_queue))
            processes.append(proc)
            proc.start()

        # collect results to dictionary
        for p in range(len(processes)):
            node_to_leaves.update(node_to_leaves_queue.get())

            for leaf_key, list_value in leaf_dist_to_node_queue.get().items():
                for (n, distance) in list_value:
                    try:
                        self.leaf_dist_to_node[leaf_key][n] = distance
                    except:
                        self.leaf_dist_to_node[leaf_key] = {n:distance}

        # wait for all processes to end
        for proc in processes:
github fjxmlzn / GPUTaskScheduler / gpu_task_scheduler / gpu_task_scheduler.py View on Github external
def start(self):
        gpu_envs = self._config_manager.get_gpu_envs()
        processes = []
        for gpu_env in gpu_envs:
            try:
                processes.append(
                    pathos.helpers.mp.process.Process(
                        target=gpu_worker, args=(
                            gpu_env["name"], gpu_env["env"],
                            self._lock, self._config, self._gpu_task_class,
                            self._config_manager, logging.getLogger(""))))
            except AttributeError:
                processes.append(
                    pathos.helpers.mp.Process(
                        target=gpu_worker, args=(
                            gpu_env["name"], gpu_env["env"],
                            self._lock, self._config, self._gpu_task_class,
                            self._config_manager, logging.getLogger(""))))
        for process in processes:
            process.start()
        for process in processes:
            process.join()