How to use the multiprocessing.Queue function in multiprocessing

To help you get started, we’ve selected a few multiprocessing examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lanl / scout / test / dosep.py View on Github external
    @param num_threads the number of worker processes to use.

    @param test_work_items the iterable of test work item tuples
    to run.
    """

    # Initialize our global state.
    initialize_global_vars_multiprocessing(num_threads, test_work_items)

    # Create jobs.
    job_queue = multiprocessing.Queue(len(test_work_items))
    for test_work_item in test_work_items:
        job_queue.put(test_work_item)

    result_queue = multiprocessing.Queue(len(test_work_items))

    # Create queues for started child pids.  Terminating
    # the multiprocess processes does not terminate the
    # child processes they spawn.  We can remove this tracking
    # if/when we move to having the multiprocess process directly
    # perform the test logic.  The Queue size needs to be able to
    # hold 2 * (num inferior dotest.py processes started) entries.
    inferior_pid_events = multiprocessing.Queue(4096)

    # Worker dictionary allows each worker to figure out its worker index.
    manager = multiprocessing.Manager()
    worker_index_map = manager.dict()

    # Create workers.  We don't use multiprocessing.Pool due to
    # challenges with handling ^C keyboard interrupts.
    workers = []
github syncpy / SyncPy / src / ui / MethodWidget.py View on Github external
def compute(self, signals, outputBasename):
        try:
            dictionary = self.getArgumentsAsDictionary()
            self.computeProcess = self.currentMethod(**dictionary)

        except Exception, e:
            print >> sys.stderr, "Error: "+str(e)
            self.computationFinished.emit()
            return

        self.computeProcess.errorRaised = False
        self.computeProcess.setOutputFilename(outputBasename)
        self.computeProcess.resQueue = multiprocessing.Queue(0)

        self.methodResults = {}
        self.computationInterrupted = False

        self.computeProcess.start(signals, self.computeProcess.resQueue)
        self.computeCheckTimer.start(1000)
github openstack / swift / swift / stats / log_processor.py View on Github external
def multiprocess_collate(processor_args, logs_to_process, worker_count):
    '''
    yield hourly data from logs_to_process
    Every item that this function yields will be added to the processed files
    list.
    '''
    results = []
    in_queue = multiprocessing.Queue()
    out_queue = multiprocessing.Queue()
    for _junk in range(worker_count):
        p = multiprocessing.Process(target=collate_worker,
                                    args=(processor_args,
                                          in_queue,
                                          out_queue))
        p.start()
        results.append(p)
    for x in logs_to_process:
        in_queue.put(x)
    for _junk in range(worker_count):
        in_queue.put(None)  # tell the worker to end
    while True:
        try:
            item, data = out_queue.get_nowait()
        except Queue.Empty:
github rafariva / ANPR-Tensorflow / anpr / train.py View on Github external
def wrapped(*args, **kwargs):
        q = multiprocessing.Queue(3)
        proc = multiprocessing.Process(target=main,
                                       args=(q, args, kwargs))
        proc.start()
        try:
            while True:
                item = q.get()
                yield item
        finally:
            proc.terminate()
            proc.join()
github lancopku / pkuseg-python / pkuseg / trainer.py View on Github external
def _decode_multi_proc(self, testset: DataSet, model: Model):
        in_queue = Queue()
        out_queue = Queue()
        procs = []
        nthread = self.config.nThread
        for i in range(nthread):
            p = Process(
                target=self._decode_proc, args=(model, in_queue, out_queue)
            )
            procs.append(p)

        for idx, example in enumerate(testset):
            in_queue.put((idx, example.features))

        for proc in procs:
            in_queue.put(None)
            proc.start()
github balsam-alcf / balsam / balsam / management / commands / balsam_service.py View on Github external
logger.exception(' Received Exception while trying to start job receiver: ' + str(e))
         
         # Balsam status message sender
         status_sender = BalsamStatusSender.BalsamStatusSender(settings.SENDER_CONFIG)

         # setup timer for cleaning the work folder of old files
         logger.debug('creating DirCleaner')
         workDirCleaner = DirCleaner.DirCleaner(settings.BALSAM_WORK_DIRECTORY,
                                     settings.BALSAM_DELETE_OLD_WORK_PERIOD,
                                     settings.BALSAM_DELETE_OLD_WORK_AGE,
                                    )

         # create the balsam service queue which subprocesses use to commicate
         # back to the the service. It is also used to wake up the while-loop
         logger.debug('creating balsam_service_queue')
         balsam_service_queue = multiprocessing.Queue()
         jobs_in_transition_by_id = {}

         # this is the loop that never ends, yes it goes on and on my friends...
         while True:
            logger.debug('begin service loop ')


            # loop over queued jobs and check their status
            # also look for jobs that have been submitted but are not in the queued or running state, which 
            # may mean they have finished or exited.
            logger.debug( ' checking for active jobs ')
            active_jobs = models.BalsamJob.objects.filter(state__in = models.CHECK_STATUS_STATES)
            if len(active_jobs) > 0:
               logger.info( 'monitoring ' + str(len(active_jobs)) + ' active jobs')
            else:
               logger.debug(' no active jobs')
github mit-ll / LO-PHI / lophi-automation / analysis_scripts / malware.py View on Github external
timestamps.append((boot_start,boot_stop))
            
            # Wait a bit before doing stuff (Allow the OS to finish booting
            print "* %s: Timestamp: %f"%(self.machine.config.name, time.time())
            print "* %s: Waiting for the OS to stabilize" % \
                  self.machine.config.name
            osstable_start = time.time()
            time.sleep(self.OS_BOOT_WAIT)
            osstable_stop = time.time()
            timestamps.append((osstable_start,osstable_stop))
                 
            # Start our disk capturing
#             print "* %s: Timestamp: %f"%(self.machine.config.name,time.time())
            print "* %s: Starting disk capture..."%self.machine.config.name
            disk_pcap_file = os.path.join(tmp_dir,"sut_disk_io.dcap")
            disk_queue = multiprocessing.Queue()
            dcap_writer = CaptureWriter(disk_pcap_file,
                                     disk_queue)
            disk_tap = DiskCaptureEngine(machine,
                                         disk_queue)
            # Start disk capture
            disk_tap.start()
            dcap_writer.start()
            
            # Send keypresses to download binary   
            print "* %s: Timestamp: %f"%(self.machine.config.name,time.time())                  
            print "* %s: Sending keypresses..."%self.machine.config.name
            
            # Get our keypress generator
            kpg = machine.keypress_get_generator()
            
            # Check ftp info, and send commands to execute malware
github apache / incubator-retired-slider / slider-agent / src / main / python / resource_management / core / shell.py View on Github external
pidfile.close()

    ## wait poll_after seconds and poll
    if poll_after:
      time.sleep(poll_after)
      if proc.poll() is None:
        return None, None #if still running then return
      else:
        logAnyway = True #assume failure and log
        Logger.warning("Process is not up after the polling interval " + str(poll_after) + " seconds.")
    else:
      return None, None


  if timeout:
    q = Queue()
    t = threading.Timer( timeout, on_timeout, [proc, q] )
    t.start()
    
  #out = proc.communicate()[0].strip('\n')
  out = proc.communicate()
  
  if timeout:
    if q.empty():
      t.cancel()
    # timeout occurred
    else:
      raise ExecuteTimeoutException()
   
  code = proc.returncode
  
  if (logoutput or logAnyway) and out:
github ldv-klever / klever / Psi / psi / vtsc / __init__.py View on Github external
def before_launch_all_components(context):
    context.mqs['VTSC common prj attrs'] = multiprocessing.Queue()
github luoyetx / Joint-Face-Detection-and-Alignment / jfda / prepare.py View on Github external
def gen(data, db_name):
    remove_if_exists(db_name)
    logger.info('fill queues')
    q_in = [multiprocessing.Queue() for i in range(cfg.WORKER_N)]
    q_out = multiprocessing.Queue(1024)
    fill_queues(data, q_in)
    readers = [multiprocessing.Process(target=celeba_reader_func, args=(q_in[i], q_out)) \
               for i in range(cfg.WORKER_N)]
    for p in readers:
      p.start()
    writer = multiprocessing.Process(target=celeba_writer_func, args=(q_out, db_name))
    writer.start()
    for p in readers:
      p.join()
    q_out.put(('finish', []))
    writer.join()