How to use the multiprocessing.current_process function in multiprocessing

To help you get started, we’ve selected a few multiprocessing examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aliyun / aliyun-log-python-sdk / tests / consumer_group_examples / sync_data_to_syslog.py View on Github external
endpoint = os.environ.get('SLS_ENDPOINT', '')
    accessKeyId = os.environ.get('SLS_AK_ID', '')
    accessKey = os.environ.get('SLS_AK_KEY', '')
    project = os.environ.get('SLS_PROJECT', '')
    logstore = os.environ.get('SLS_LOGSTORE', '')
    consumer_group = os.environ.get('SLS_CG', '')

    assert endpoint and accessKeyId and accessKey and project and logstore and consumer_group, \
        ValueError("endpoint/access_id/key/project/logstore/consumer_group/name cannot be empty")

    ##########################
    # Some advanced options
    ##########################

    # DON'T configure the consumer name especially when you need to run this program in parallel
    consumer_name = "{0}-{1}".format(consumer_group, current_process().pid)

    # This options is used for initialization, will be ignored once consumer group is created and each shard has beeen started to be consumed.
    # Could be "begin", "end", "specific time format in ISO", it's log receiving time.
    cursor_start_time = "2019-1-1 0:0:0+8:00"

    # once a client doesn't report to server * heartbeat_interval * 2 interval, server will consider it's offline and re-assign its task to another consumer.
    # thus  don't set the heatbeat interval too small when the network badwidth or performance of consumtion is not so good.
    heartbeat_interval = 20

    # if the coming data source data is not so frequent, please don't configure it too small (<1s)
    data_fetch_interval = 1

    # create one consumer in the consumer group
    option = LogHubConfig(endpoint, accessKeyId, accessKey, project, logstore, consumer_group, consumer_name,
                          cursor_position=CursorPosition.SPECIAL_TIMER_CURSOR,
                          cursor_start_time=cursor_start_time,
github sentinel-hub / eo-learn / core / eolearn / core / eoexecution.py View on Github external
def execute_with_mp_lock(execution_function, *args, **kwargs):
    """ A helper utility function that executes a given function with multiprocessing lock if the process is being
    executed in a multi-processing mode

    :param execution_function: A function
    :param args: Function's positional arguments
    :param kwargs: Function's keyword arguments
    :return: Function's results
    """
    if multiprocessing.current_process().name == 'MainProcess' or MULTIPROCESSING_LOCK is None:
        return execution_function(*args, **kwargs)

    with MULTIPROCESSING_LOCK:
        return execution_function(*args, **kwargs)
github steve71 / RasPiBrew / webpy / raspibrew_webpy.py View on Github external
def heatProcGPIO(cycle_time, duty_cycle, conn):
    p = current_process()
    print 'Starting:', p.name, p.pid
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(17, GPIO.OUT)
    while (True):
        while (conn.poll()): #get last
            cycle_time, duty_cycle = conn.recv()
        conn.send([cycle_time, duty_cycle])  
        if duty_cycle == 0:
            GPIO.output(17, False)
            time.sleep(cycle_time)
        elif duty_cycle == 100:
            GPIO.output(17, True)
            time.sleep(cycle_time)
        else:
            on_time, off_time = getonofftime(cycle_time, duty_cycle)
            GPIO.output(17, True)
github ethereum / trinity / trinity / db / beacon / manager.py View on Github external
def create_db_server_manager(trinity_config: TrinityConfig,
                             base_db: AtomicDatabaseAPI) -> BaseManager:
    app_config = trinity_config.get_app_config(BeaconAppConfig)
    chain_config = app_config.get_chain_config()
    chaindb = BeaconChainDB(base_db, chain_config.genesis_config)

    if not is_beacon_database_initialized(chaindb, BeaconBlock):
        initialize_beacon_database(chain_config, chaindb, base_db, BeaconBlock)

    # This enables connection when clients launch from another process on the shell
    multiprocessing.current_process().authkey = AUTH_KEY

    class DBManager(BaseManager):
        pass

    DBManager.register(
        'get_db', callable=lambda: TracebackRecorder(base_db), proxytype=AsyncDBProxy)

    DBManager.register(
        'get_chaindb',
        callable=lambda: TracebackRecorder(chaindb),
        proxytype=AsyncBeaconChainDBProxy,
    )

    manager = DBManager(address=str(trinity_config.database_ipc_path))  # type: ignore
    return manager
github mozilla / autophone / worker.py View on Github external
def is_alive(self):
        """Call from main process."""
        assert(multiprocessing.current_process().name == 'autophone')
        try:
            if self.options.verbose:
                self.loggerdeco.debug('is_alive: PhoneWorkerSubProcess.p %s, pid %s',
                                      self.p, self.p.pid if self.p else None)
            return self.p and self.p.is_alive()
        except Exception:
            self.loggerdeco.exception('is_alive: PhoneWorkerSubProcess.p %s, pid %s',
                                      self.p, self.p.pid if self.p else None)
        return False
github rembo10 / headphones / headphones / music_encoder.py View on Github external
def command_map(args):
    """
    Wrapper for the '[multiprocessing.]map()' method, to unpack the arguments
    and wrap exceptions.
    """

    # Initialize multiprocessing logger
    if multiprocessing.current_process().name != "MainProcess":
        logger.initMultiprocessing()

    # Start encoding
    try:
        return command(*args)
    except Exception:
        logger.exception("Encoder raised an exception.")
        return False
github modflowpy / flopy / examples / groundwater_paper / scripts / uspb_capture_par.py View on Github external
def unpack_args(args):
    try:
        f, idx, nmax, k, i, j, Qt, base, hdry = args
    except:
        sys.stdout.write('could not unpack args\n')
        raise
    try:
        current = mp.current_process()
        imod = current._identity[0]-1
    except:
        sys.stdout.write('could not get current process\n')
        raise
    return f(imod, idx, nmax, k, i, j, Qt, base, hdry)
github caiostringari / pywavelearn / scripts / extract_timestack.py View on Github external
- rotate the matrix;
     - extract the red, green and blue pixel arrays;
     - save the data in netcdf4 format.

    ----------
    Args:
        num [Mandatory (int)]: Number of the process being called.

        frames [Mandatory (list)]: List of frames to process.

    ----------
    Returns:

    """

    name = mp.current_process().name

    # print ("Worker",num)

    print("    + Worker ", num, ' starting')

    N = len(frames)

    # If

    # Loop over the files in the chunk
    k = 0
    for frame in frames:
        percent = round(((k * 100) / N), 2)
        print(
            "      --> Processing frame {} of {} ({} %) [Worker {}]".format(
                k +
github niftools / pyffi / pyffi / spells / __init__.py View on Github external
def _log(cls, level, level_str, msg):
            # do not actually log, just print
            if level >= cls.level:
                print("pyffi.toaster:%i:%s:%s"
                      % (multiprocessing.current_process().pid,
                         level_str, msg))
github facebookresearch / pythia / pythia / tasks / processors.py View on Github external
def _is_main_process(self):
        return multiprocessing.current_process().name == "Process-1"