How to use the multiprocessing.JoinableQueue function in multiprocessing

To help you get started, we’ve selected a few multiprocessing examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bslatkin / ringbuffer / test_ringbuffer.py View on Github external
def new_queue(self):
        return multiprocessing.JoinableQueue()
github migvel / color_trace / color_trace_multi.py View on Github external
dither: dithering algorithm to use. (Remember, final output is affected by despeckle.)
        None: the default, performs no dithering
        'floydsteinberg': available with 'mc', 'as', and 'nq'
        'riemersma': only available with 'as'
    palette: source of custom palette image for color reduction (overrides
        colors and quantization)
    stack: whether to stack color traces (recommended for more accurate output)
    despeckle: supress speckles of this many pixels
    smoothcorners: corner smoothing: 0 for no smoothing, 1.334 for max
    optimizepaths: Bezier curve optimization: 0 for least, 5 for most
"""
    tmp = tempfile.mkdtemp()

    # create a two job queues
    # q1 = scaling + color reduction
    q1 = multiprocessing.JoinableQueue()
    # q2 = isolation + tracing
    q2 = multiprocessing.JoinableQueue()

    # create a manager to share the layers between processes
    manager = multiprocessing.Manager()
    layers = []
    for i in range(min(len(inputs), len(outputs))):
        layers.append(manager.list())
    # and make a lock for reading and modifying layers
    layers_lock = multiprocessing.Lock()

    # create a shared memory counter of completed and total tasks for measuring progress
    progress = multiprocessing.Value('i', 0)
    if colors is not None:
        # this is only an estimate because quantization can result in less colors
        # than in the "colors" variable. This value is corrected by q1 tasks to converge
github pyga / awpa / awpa / refactor.py View on Github external
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in range(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in range(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None
github parltrack / parltrack / utils / multiplexer.py View on Github external
def __init__(self, worker, threads=4):
        self.worker=worker
        self.q=JoinableQueue()
        self.done = Value(c_bool,False)
        self.consumer=Process(target=self.consume)
        self.pool = Pool(threads)
github airbnb / binaryalert / cli / manager.py View on Github external
def _enqueue(
            queue_name: str, messages: Iterable[Dict[str, Any]],
            summary_func: Callable[[Dict[str, Any]], Tuple[int, str]]) -> None:
        """Use multiple worker processes to enqueue messages onto an SQS queue in batches.

        Args:
            queue_name: Name of the target SQS queue
            messages: Iterable of dictionaries, each representing a single SQS message body
            summary_func: Function from message to (item_count, summary) to show progress
        """
        num_workers = multiprocessing.cpu_count() * 4
        tasks: JoinableQueue = JoinableQueue(num_workers * 10)  # Max tasks waiting in queue

        # Create and start worker processes
        workers = [Worker(queue_name, tasks) for _ in range(num_workers)]
        for worker in workers:
            worker.start()

        # Create an EnqueueTask for each batch of 10 messages (max allowed by SQS)
        message_batch = []
        progress = 0  # Total number of relevant "items" processed so far
        for message_body in messages:
            count, summary = summary_func(message_body)
            progress += count
            print('\r{}: {:<90}'.format(progress, summary), end='', flush=True)

            message_batch.append(json.dumps(message_body, separators=(',', ':')))
github ColumbiaDVMM / ColumbiaImageSearch / cufacesearch / cufacesearch / updater / extraction_processor.py View on Github external
def init_queues(self):
    """Initialize queues list ``self.q_in`` and ``self.q_out``
    """
    from multiprocessing import JoinableQueue
    self.q_in = []
    self.q_out = []
    for _ in range(self.nb_threads):
      self.q_in.append(JoinableQueue(0))
      self.q_out.append(JoinableQueue(0))
github kvfrans / parallel-trpo / rollouts.py View on Github external
def __init__(self, args):
        self.args = args

        self.tasks = multiprocessing.JoinableQueue()
        self.results = multiprocessing.Queue()

        self.actors = []
        self.actors.append(Actor(self.args, self.tasks, self.results, 9999, args.monitor))

        for i in xrange(self.args.num_threads-1):
            self.actors.append(Actor(self.args, self.tasks, self.results, 37*(i+3), False))

        for a in self.actors:
            a.start()

        # we will start by running 20,000 / 1000 = 20 episodes for the first ieration

        self.average_timesteps_in_episode = 1000
github WZBSocialScienceCenter / tmtoolkit / tmtoolkit / preprocess / _tmpreproc.py View on Github external
self.results_queue = mp.Queue()
        self.workers = []
        self.docs2workers = {}

        common_kwargs = dict(tokenizer=self.tokenizer,
                             stemmer=self.stemmer,
                             lemmatizer=self.lemmatizer,
                             pos_tagger=self.pos_tagger)

        if initial_states is not None:
            if docs is not None:
                raise ValueError('`docs` must be None when loading from initial states')
            logger.info('setting up %d worker processes with initial states' % len(initial_states))

            for i_worker, w_state in enumerate(initial_states):
                task_q = mp.JoinableQueue()

                w = PreprocWorker(i_worker, self.language, task_q, self.results_queue,
                                  name='_PreprocWorker#%d' % i_worker, **common_kwargs)
                w.start()

                task_q.put(('set_state', w_state))

                self.workers.append(w)
                for dl in w_state['_doc_labels']:
                    self.docs2workers[dl] = i_worker
                self.tasks_queues.append(task_q)

            [q.join() for q in self.tasks_queues]

        else:
            if docs is None:
github mobiusklein / ms_deisotope / ms_deisotope / tools / deisotoper / scan_generator.py View on Github external
def _initialize_workers(self, start_scan=None, end_scan=None, max_scans=None):
        try:
            self._input_queue = JoinableQueue(int(1e6))
            self._output_queue = JoinableQueue(int(1e6))
        except OSError:
            # Not all platforms permit limiting the size of queues
            self._input_queue = JoinableQueue()
            self._output_queue = JoinableQueue()

        self._preindex_file()

        if self.extract_only_tandem_envelopes:
            self.log("Constructing Scan Interval Tree")
            self._make_interval_tree(start_scan, end_scan)

        self._terminate()
        self._scan_yielder_process = ScanIDYieldingProcess(
            self.ms_file, self._input_queue, start_scan=start_scan, end_scan=end_scan,
            max_scans=max_scans, no_more_event=self.scan_ids_exhausted_event,
            ignore_tandem_scans=self.ignore_tandem_scans, batch_size=1)