How to use loky - 10 common examples

To help you get started, we’ve selected a few loky examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github michaelnowotny / cocos / cocos / multi_processing / map_reduce.py View on Github external
else:
            raise ValueError('Number_of_batches must be defined if '
                             'both args_list and kwargs_list are empty')

    if args_list is None:
        args_list = number_of_batches * [list()]
    if kwargs_list is None:
        kwargs_list = number_of_batches * [dict()]

    result = initial_value
    if multiprocessing_pool_type == MultiprocessingPoolType.LOKY:
        from concurrent.futures import as_completed
        from loky import get_reusable_executor

        executor = \
            get_reusable_executor(timeout=None,
                                  context='loky')

        futures = [executor.submit(f, *args, **kwargs)
                   for args, kwargs
                   in zip(args_list, kwargs_list)]

        result_from_future = lambda x: x.result()
    elif multiprocessing_pool_type == MultiprocessingPoolType.PATHOS:
        from pathos.pools import ProcessPool
        pool = ProcessPool()
        futures = [pool.apipe(f, *args, **kwargs)
                   for args, kwargs
                   in zip(args_list, kwargs_list)]

        result_from_future = lambda x: x.get()
    else:
github anttttti / Wordbatch / wordbatch / batcher.py View on Github external
results = [task(minibatch) for minibatch in paral_params]
		else:
			if backend=="multiprocessing":
				with closing(multiprocessing.Pool(max(1, procs), maxtasksperchild=2)) as pool:
					results = pool.map_async(task, paral_params)
					pool.close()
					pool.join()
					results= results.get()
			elif backend=="threading":
				with closing(multiprocessing.dummy.Pool(max(1,procs))) as pool:
					results= pool.map(task, paral_params)
					pool.close()
					pool.join()
			if backend=="loky":
				from loky import get_reusable_executor
				pool= get_reusable_executor(max_workers=max(1, procs))
				results= list(pool.map(task, paral_params))
			elif backend == "dask":
				###if not (input_split):  data= self.scatter(data)
				results = [self.backend_handle.submit(task, params) for params in paral_params]
			elif backend == "spark":
				def apply_func_to_indexedrdd(batch):
					return [batch[0]] + [task([batch[1]] + args)]
				results = paral_params.map(apply_func_to_indexedrdd)
			elif backend == "ray":
				import ray
				@ray.remote
				def f_ray(f, data):
					return f(data)
				results = [f_ray.remote(task, params) for params in paral_params]
				results = [self.backend_handle.get(x) for x in results] #Slower, but handles edge cases
				#results= self.backend_handle.get(results) #Faster, but crashes on edge cases?
github qucontrol / krotov / src / krotov / parallelization.py View on Github external
progress_bar = BaseProgressBar()
    if progress_bar is True:
        progress_bar = TextProgressBar()

    progress_bar.start(len(values))
    nfinished = [0]

    def _update_progress_bar(x):
        nfinished[0] += 1
        progress_bar.update(nfinished[0])

    if USE_LOKY:
        Executor = LokyReusableExecutor
        if USE_THREADPOOL_LIMITS:
            Executor = partial(
                LokyReusableExecutor,
                initializer=_process_threadpool_limits_initializier,
            )
    else:
        Executor = ProcessPoolExecutor

    _threadpool_limits = _no_threadpool_limits
    if USE_THREADPOOL_LIMITS:
        _threadpool_limits = threadpool_limits

    with _threadpool_limits(limits=1):
        with Executor(max_workers=num_cpus) as executor:
            jobs = []
            try:
                for value in values:
                    args = (value,) + tuple(task_args)
                    job = executor.submit(task, *args, **task_kwargs)
github michaelnowotny / cocos / cocos / multi_processing / device_pool.py View on Github external
if exclude_intel_devices:
            compute_devices = \
                filter(lambda x: 'intel' not in x.name.lower(),
                       [compute_device
                        for compute_device
                        in self._compute_devices])
            self._compute_devices = frozenset(compute_devices)

        # ctx = multiprocessing.get_context("spawn")
        # self._executor = ProcessPoolExecutor(max_workers=self._n_gpus,
        #                                      mp_context=ctx)

        if multiprocessing_pool_type == MultiprocessingPoolType.LOKY:
            from loky import get_reusable_executor, wait

            self._executor = get_reusable_executor(max_workers=self.number_of_devices,
                                                   timeout=None,
                                                   context='loky')

            futures = [self._executor.submit(_init_gpu_in_process,
                                             device_id=compute_device.id)
                       for compute_device
                       in self._compute_devices]

            wait(futures)

            [future.result() for future in futures]
        elif multiprocessing_pool_type == MultiprocessingPoolType.PATHOS:
            from pathos.pools import ProcessPool

            self._executor = ProcessPool(nodes=self.number_of_devices)
            futures = [self._executor.apipe(_init_gpu_in_process, device_id=compute_device.id)
github michaelnowotny / cocos / cocos / multi_processing / device_pool.py View on Github external
sync()
            result = f(*args, **kwargs)
            if device_to_host_transfer_function is not None:
                result = device_to_host_transfer_function(result)
            sync()
            return result

        result = initial_value
        if self.multiprocessing_pool_type == MultiprocessingPoolType.LOKY:
            from loky import as_completed

            futures = [self._executor.submit(synced_f, *args, **kwargs)
                       for i, (args, kwargs)
                       in enumerate(zip(args_list, kwargs_list))]

            for future in as_completed(futures):
                result = reduction(result, future.result())
                # result = reduce_with_none(result, future.result(), reduction)
        elif self.multiprocessing_pool_type == MultiprocessingPoolType.PATHOS:
            futures = [self._executor.apipe(synced_f, *args, **kwargs)
                       for args, kwargs
                       in zip(args_list, kwargs_list)]

            for future in futures:
                result = reduction(result, future.get())
                # result = reduce_with_none(result, future.get(), reduction)
        else:
            raise ValueError(f'Multiprocessing pool type {self.multiprocessing_pool_type} not supported')

        return result
github michaelnowotny / cocos / cocos / multi_processing / device_pool.py View on Github external
# self._executor = ProcessPoolExecutor(max_workers=self._n_gpus,
        #                                      mp_context=ctx)

        if multiprocessing_pool_type == MultiprocessingPoolType.LOKY:
            from loky import get_reusable_executor, wait

            self._executor = get_reusable_executor(max_workers=self.number_of_devices,
                                                   timeout=None,
                                                   context='loky')

            futures = [self._executor.submit(_init_gpu_in_process,
                                             device_id=compute_device.id)
                       for compute_device
                       in self._compute_devices]

            wait(futures)

            [future.result() for future in futures]
        elif multiprocessing_pool_type == MultiprocessingPoolType.PATHOS:
            from pathos.pools import ProcessPool

            self._executor = ProcessPool(nodes=self.number_of_devices)
            futures = [self._executor.apipe(_init_gpu_in_process, device_id=compute_device.id)
                       for compute_device
                       in self._compute_devices]

            for future in futures:
                while not future.ready():
                    pass
        else:
            raise ValueError(f'Multiprocessing pool type {multiprocessing_pool_type} not supported')
github qucontrol / krotov / src / krotov / parallelization.py View on Github external
num_cpus = multiprocessing.cpu_count()

    if progress_bar is None:
        progress_bar = BaseProgressBar()
    if progress_bar is True:
        progress_bar = TextProgressBar()

    progress_bar.start(len(values))
    nfinished = [0]

    def _update_progress_bar(x):
        nfinished[0] += 1
        progress_bar.update(nfinished[0])

    if USE_LOKY:
        Executor = LokyReusableExecutor
        if USE_THREADPOOL_LIMITS:
            Executor = partial(
                LokyReusableExecutor,
                initializer=_process_threadpool_limits_initializier,
            )
    else:
        Executor = ProcessPoolExecutor

    _threadpool_limits = _no_threadpool_limits
    if USE_THREADPOOL_LIMITS:
        _threadpool_limits = threadpool_limits

    with _threadpool_limits(limits=1):
        with Executor(max_workers=num_cpus) as executor:
            jobs = []
            try:
github mapillary / OpenSfM / opensfm / context.py View on Github external
def parallel_map(func, args, num_proc):
    """Run function for all arguments using multiple processes."""
    num_proc = min(num_proc, len(args))
    if num_proc <= 1:
        return list(map(func, args))
    else:
        with get_reusable_executor(max_workers=num_proc, timeout=None) as e:
            return list(e.map(func, args))
github qucontrol / krotov / src / krotov / parallelization.py View on Github external
This function should only be called once per script/notebook, at its
        very beginning. The :obj:`USE_LOKY` and :obj:`USE_THREADPOOL_LIMITS`
        variables may be set at any time.
    """
    global USE_LOKY
    start_methods = ['fork', 'spawn', 'forkserver']
    if use_loky:
        start_methods.extend(['loky', 'loky_int_main'])
    if start_method is not None:
        if start_method not in start_methods:
            raise ValueError("start_method not in %s" % str(start_methods))
    if use_loky:
        if not _HAS_LOKY:
            raise ImportError("The loky library is not installed.")
        USE_LOKY = True
        loky.backend.context.set_start_method(start_method)
        if loky_pickler is not None:
            loky.set_loky_pickler(loky_pickler)
    else:
        multiprocessing.set_start_method(start_method)
github qucontrol / krotov / src / krotov / parallelization.py View on Github external
def _parallel_map_fw_prop_step_loky(shared, values, task_args):
    """Loky-based implementation of :func:`parallel_map_fw_prop_step`."""
    tlist = task_args[4]
    pulses = task_args[2]
    time_index = task_args[5]
    n = len(values)
    if time_index == 0:
        # we only send the full task_args through IPC once, for the first time
        # step. Subsequent time steps will reuse the data
        shared.executors = [
            LokyProcessPoolExecutor(
                max_workers=1,
                initializer=partial(
                    _pmfw_initializer, limit_thread_pool=USE_THREADPOOL_LIMITS
                ),
                initargs=(
                    state_index,
                    task_args[0][state_index],  # initial_state
                    task_args[1][state_index],  # objective
                    task_args[2],  # pulses
                    task_args[3],  # pulses_mapping
                    task_args[4],  # tlist
                    task_args[6][state_index],  # propagator
                ),
            )
            for state_index in range(n)
        ]

loky

A robust implementation of concurrent.futures.ProcessPoolExecutor

BSD-3-Clause
Latest version published 10 months ago

Package Health Score

75 / 100
Full package analysis