How to use the multiprocessing.Process function in multiprocessing

To help you get started, we’ve selected a few multiprocessing examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jarus / flask-testing / flask_testing / utils.py View on Github external
# the port out of Flask once we call `run`.
            original_socket_bind = socketserver.TCPServer.server_bind
            def socket_bind_wrapper(self):
                ret = original_socket_bind(self)

                # Get the port and save it into the port_value, so the parent process
                # can read it.
                (_, port) = self.socket.getsockname()
                port_value.value = port
                socketserver.TCPServer.server_bind = original_socket_bind
                return ret

            socketserver.TCPServer.server_bind = socket_bind_wrapper
            app.run(port=port, use_reloader=False)

        self._process = multiprocessing.Process(
            target=worker, args=(self.app, self._configured_port)
        )

        self._process.start()

        # We must wait for the server to start listening, but give up
        # after a specified maximum timeout
        timeout = self.app.config.get('LIVESERVER_TIMEOUT', 5)
        start_time = time.time()

        while True:
            elapsed_time = (time.time() - start_time)
            if elapsed_time > timeout:
                raise RuntimeError(
                    "Failed to start the server after %d seconds. " % timeout
                )
github Chudry / Xerror / namp_scann.py View on Github external
more_loop = True
    while more_loop:
        time.sleep(5)

        for i in range(0,number_of_processes) :
            if processes[i].is_alive():
                processes[i].join(1)
                print("jobs is not finished",processes[i])
            else:
                if index >= len(targets):
                    for p in processes:
                        p.join()
                    more_loop = False
                    break
                processes[i] = multiprocessing.Process(target=runScan,args=(targets[index],))
                processes[i].start()
                index+=1
   
    # os.popen('google-chrome '+ files[0]) #auto opens the chrome when its done 1 by one       
    print("Pool completed execution!!!")
    print("Exiting main thread.")
    exit(0)
github itdaniher / rtlsdr-rds-demod / streamer.py View on Github external
def spawner(future_yielder):
    def loopwrapper(main):
        loop = asyncio.get_event_loop()
        asyncio.ensure_future(main())
        loop.run_forever()
    multiprocessing.Process(target = loopwrapper, args = (future_yielder,)).start()
github rancher / cattle / pyagent / dstack / agent / event.py View on Github external
def _start_children(self):
        pid = os.getpid()
        for i in range(self._workers):
            p = Process(target=_worker, args=(self._queue, pid))
            p.start()
            self._children.append(p)
github ComparativeGenomicsToolkit / cactus / src / cactus / pipeline / DBserverControl.py View on Github external
except:
        logger.warning("Can't find which ports are occupied--likely netstat is not installed."
                       " Choosing a random port to start the DB on, good luck!")
        port = random.randint(1025, _MAX_DBSERVER_PORT)
    dbElem.setDbPort(port)

    process = DBServerProcess(dbElem, logPath, fileStore, existingSnapshotID, snapshotExportID)
    process.daemon = True
    process.start()

    if not blockUntilDBserverIsRunning(dbElem):
        raise RuntimeError("Unable to launch DBserver in time.")

    return process, dbElem, logPath

class DBServerProcess(Process):
    """Independent process that babysits the DBserver process.

    Waits for the TERMINATE flag to be set, then kills the DB and
    copies the final snapshot to snapshotExportID.
    """
    exceptionMsg = Queue()

    def __init__(self, *args, **kwargs):
        self.args = args
        self.kwargs = kwargs
        super(DBServerProcess, self).__init__()

    def run(self):
        """Run the tryRun method, signaling the main thread if an exception occurs."""
        try:
            self.tryRun(*self.args, **self.kwargs)
github maxpumperla / deep_learning_and_the_game_of_go / code / play_train_eval_q.py View on Github external
def train_on_experience(learning_agent, output_file, experience_file,
                        lr, batch_size):
    # Do the training in the background process. Otherwise some Keras
    # stuff gets initialized in the parent, and later that forks, and
    # that messes with the workers.
    worker = multiprocessing.Process(
        target=train_worker,
        args=(
            learning_agent,
            output_file,
            experience_file,
            lr,
            batch_size
        )
    )
    worker.start()
    worker.join()
github binarydud / pyres / pyres / horde.py View on Github external
def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None):
        multiprocessing.Process.__init__(self, name='Minion')

        #format = '%(asctime)s %(levelname)s %(filename)s-%(lineno)d: %(message)s'
        #logHandler = logging.StreamHandler()
        #logHandler.setFormatter(logging.Formatter(format))
        #self.logger = multiprocessing.get_logger()
        #self.logger.addHandler(logHandler)
        #self.logger.setLevel(logging.DEBUG)

        self.queues = queues
        self._shutdown = False
        self.hostname = os.uname()[1]
        self.server = server
        self.password = password

        self.log_level = log_level
        self.log_path = log_path
github ncullen93 / Unet-ants / code / sampling / dataloader.py View on Github external
self.done_event = threading.Event()

        self.samples_remaining = len(self.sampler)
        self.sample_iter = iter(self.sampler)

        if self.num_workers > 0:
            self.index_queue = multiprocessing.SimpleQueue()
            self.data_queue = multiprocessing.SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.workers = [
                multiprocessing.Process(
                    target=_worker_loop,
                    args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))
                for _ in range(self.num_workers)]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
github jpirko / lnst / lnst / Common / NetTestCommand.py View on Github external
def run(self):
        self._start_time = time()
        if isinstance(self._cmd_cls, NetTestCommandControl) or \
           isinstance(self._cmd_cls, NetTestCommandConfig):
            return self._cmd_cls.run()

        self._read_pipe, self._write_pipe = multiprocessing.Pipe()
        self._process = multiprocessing.Process(target=self._run)

        self._process.daemon = False
        self._process.start()
        self._pid = self._process.pid

        self._connection_pipe = self._read_pipe

        if not self._id:
            logging.debug("Running command with"
                          " pid \"%d\"" % (self._pid))
            return None
        else:
            logging.debug("Running in background with"
                          " bg_id \"%s\" pid \"%d\"" % (self._id, self._pid))
            return {"passed": True,
                    "res_header": self._cmd_cls._format_cmd_res_header(),
github EdinburghNLP / nematus / session1 / translate.py View on Github external
word_idict[vv] = kk
    word_idict[0] = ''
    word_idict[1] = 'UNK'
    with open(dictionary_target, 'rb') as f:
        word_dict_trg = pkl.load(f)
    word_idict_trg = dict()
    for kk, vv in word_dict_trg.iteritems():
        word_idict_trg[vv] = kk
    word_idict_trg[0] = ''
    word_idict_trg[1] = 'UNK'

    queue = Queue()
    rqueue = Queue()
    processes = [None] * n_process
    for midx in xrange(n_process):
        processes[midx] = Process(target=translate_model, 
                                  args=(queue,rqueue,midx,model,options,k,normalize,))
        processes[midx].start()

    def _seqs2words(caps):
        capsw = []
        for cc in caps:
            ww = []
            for w in cc:
                if w == 0:
                    break
                ww.append(word_idict_trg[w])
            capsw.append(' '.join(ww))
        return capsw

    def _send_jobs(fname):
        with open(fname, 'r') as f: